# Import packages
import pandas as pd
import numpy as np
import os
BATH_PATH = os.getcwd()
ETC_PATH = os.path.join(BATH_PATH, "..", "etc")
DATA_PATH = os.path.join(BATH_PATH, "..", "data")
Meta_table_file = os.path.join(ETC_PATH, "rtem_all_points_metadata.csv")
# Valid Building Data
# 247
# 354
# 383
# 394
# 438
# Read Meta Table
Meta_table_DF = pd.read_csv(Meta_table_file)
Meta_table_DF.head()
| id_point | building_id | last_updated | first_updated | device | objectId | name_point | description | units | tagged_units | ... | measurement_id | datasource_hash | topic | equip_id | id_bdg | name_bdg | equip_type_abbr | equip_type_name | area_served_desc | tags | |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 0 | 310029 | 441 | 1.560571e+12 | 1.550542e+12 | NaN | NaN | 245 E 149th St. Boiler-1 Fault Code | NaN | ? | ? | ... | 18 | b115b5053493748f7076ad09f69c4456 | nyserda+0000088737+boiler-1+245 E 149th St. Bo... | 28797 | 441 | 88737 | BLR | Boiler | NaN | ['boiler', 'hvac'] |
| 1 | 310009 | 441 | 1.560485e+12 | 1.550701e+12 | NaN | NaN | 225 E 149th St. Boiler-1 Exhaust Temp | NaN | F | f | ... | 1 | 4a1a11502acee3078352da312c40c262 | nyserda+0000088737+boiler-1+225 E 149th St. Bo... | 28797 | 441 | 88737 | BLR | Boiler | NaN | ['boiler', 'hvac'] |
| 2 | 310010 | 441 | 1.560571e+12 | 1.550504e+12 | NaN | NaN | 225 E 149th St. Boiler-1 Fault Code | NaN | ? | ? | ... | 18 | 55882633fe8fb5aac7dac643c99b1af5 | nyserda+0000088737+boiler-1+225 E 149th St. Bo... | 28797 | 441 | 88737 | BLR | Boiler | NaN | ['boiler', 'hvac'] |
| 3 | 310011 | 441 | 1.560485e+12 | 1.550504e+12 | NaN | NaN | 225 E 149th St. Boiler-1 FFWD Temp | NaN | ? | ? | ... | 18 | 831a8df09b848f53e807fa0204c45eaa | nyserda+0000088737+boiler-1+225 E 149th St. Bo... | 28797 | 441 | 88737 | BLR | Boiler | NaN | ['boiler', 'hvac'] |
| 4 | 310012 | 441 | 1.560485e+12 | 1.550504e+12 | NaN | NaN | 225 E 149th St. Boiler-1 Fire Rate In | NaN | ? | ? | ... | 18 | aa147c829c7ed55b6e93cdbbb8b43d55 | nyserda+0000088737+boiler-1+225 E 149th St. Bo... | 28797 | 441 | 88737 | BLR | Boiler | NaN | ['boiler', 'hvac'] |
5 rows × 24 columns
# Extract Necessary meta of building
def extract_necessary_meta(building_id):
# Preprocess
Meta_table_DF = pd.read_csv(Meta_table_file)
Meta_table_DF.loc[:, "units"] = np.where(Meta_table_DF.loc[:, "units"].isna(), Meta_table_DF.loc[:, "tagged_units"], Meta_table_DF.loc[:, "units"])
Meta_table_DF.loc[:, "name_point"] = np.where(Meta_table_DF.loc[:, "name_point"].isna(), Meta_table_DF.loc[:, "description"], Meta_table_DF.loc[:, "name_point"])
# Select columns/building_id
select_cols = ["id_point", "building_id", "name_point", "description", "units", "tagged_units", "equip_id", "equip_type_abbr", "equip_type_name", "tags"]
Meta_table_DF = Meta_table_DF[Meta_table_DF["building_id"]==building_id][select_cols]
return Meta_table_DF
building_id = 488
extract_necessary_meta(building_id)
| id_point | building_id | name_point | description | units | tagged_units | equip_id | equip_type_abbr | equip_type_name | tags | |
|---|---|---|---|---|---|---|---|---|---|---|
| 5677 | 379788 | 488 | dagPowerMeasurement_Chiller 1A | NaN | kw | kw | 33139 | CH | Chiller | ['chiller', 'hvac'] |
| 5678 | 379787 | 488 | dagChillerAmperageMeasurement_Chiller 1A | NaN | a | a | 33139 | CH | Chiller | ['chiller', 'hvac'] |
| 5679 | 379786 | 488 | dagChilledWaterSupplyMeasurement_Chiller 1A | NaN | f | f | 33139 | CH | Chiller | ['chiller', 'hvac'] |
| 5680 | 379785 | 488 | dagChilledWaterReturnMeasurement_Chiller 1A | NaN | f | f | 33139 | CH | Chiller | ['chiller', 'hvac'] |
| 5681 | 379792 | 488 | dagPowerMeasurement_Chiller 1B | NaN | kw | kw | 33140 | CH | Chiller | ['chiller', 'hvac'] |
| ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... |
| 5779 | 379881 | 488 | dagSupplyFanStatusMeasurement_Floors 23-41 Per... | NaN | on/off | on/off | 33176 | FAN | Fan | ['fan', 'hvac', 'supplyFan'] |
| 5780 | 379880 | 488 | dagSupplyAirFanSpeedMeasurement_Floors 23-41 P... | NaN | rpm | rpm | 33176 | FAN | Fan | ['fan', 'hvac', 'supplyFan'] |
| 5781 | 379884 | 488 | dagSupplyFanStatusMeasurement_Floors 42-49 Per... | NaN | on/off | on/off | 33177 | FAN | Fan | ['fan', 'hvac', 'supplyFan'] |
| 5782 | 379883 | 488 | dagSupplyAirFanSpeedMeasurement_Floors 42-49 P... | NaN | rpm | rpm | 33177 | FAN | Fan | ['fan', 'hvac', 'supplyFan'] |
| 5783 | 379882 | 488 | dagSupplyAirFanSpeedMeasurement_Floors 42-49 P... | NaN | % | % | 33177 | FAN | Fan | ['fan', 'hvac', 'supplyFan'] |
107 rows × 10 columns
# Save meta for building
def save_meta_building(building_id):
DF = extract_necessary_meta(building_id)
DF.reset_index(drop=True, inplace=True)
try:
DF.to_csv(os.path.join(DATA_PATH, F"Building_{building_id}", F"Meta_Building_{building_id}.csv"))
return print(F"Meta_Building_{building_id}.csv saved!")
except:
return print(F"Meta_Building_{building_id}.csv saving failed!")
building_id = 488
save_meta_building(building_id)
Meta_Building_488.csv saved!
# Check Building Numbers in DATA_PATH & Save meta for buildings
Building_Number_List = [int(n.split("_")[1]) for n in os.listdir(DATA_PATH)]
for bid in Building_Number_List:
save_meta_building(bid)
Meta_Building_103.csv saved! Meta_Building_119.csv saved! Meta_Building_120.csv saved! Meta_Building_141.csv saved! Meta_Building_194.csv saved! Meta_Building_247.csv saved! Meta_Building_259.csv saved! Meta_Building_332.csv saved! Meta_Building_354.csv saved! Meta_Building_383.csv saved! Meta_Building_387.csv saved! Meta_Building_393.csv saved! Meta_Building_394.csv saved! Meta_Building_418.csv saved! Meta_Building_438.csv saved! Meta_Building_450.csv saved! Meta_Building_484.csv saved! Meta_Building_485.csv saved! Meta_Building_488.csv saved! Meta_Building_503.csv saved! Meta_Building_98.csv saved!
# Merge Building Data
def merge_building_data(building_id):
Building_Data_path = os.path.join(DATA_PATH, F"Building_{building_id}")
Building_Data_List = [device for device in os.listdir(Building_Data_path) if "rtem_API_data" in device]
data_list = []
for device in Building_Data_List:
temp_data = pd.read_csv(os.path.join(DATA_PATH, F"Building_{building_id}", device))
temp_data.set_index("timestamp", inplace=True)
data_list.append(temp_data)
res_DF = pd.concat(data_list, axis=1, join="outer")
res_DF.index = pd.to_datetime(res_DF.index)
for col in res_DF:
res_DF.loc[:, col] = pd.to_numeric(res_DF.loc[:, col])
return res_DF
building_id = 438
merged_data = merge_building_data(building_id)
display(merged_data)
| 308439 | 308007 | 308008 | 308009 | 308011 | 307611 | 308010 | 307602 | 307608 | 307605 | ... | 308573 | 308574 | 308575 | 308576 | 308577 | 308718 | 308717 | 308720 | 308719 | 308721 | |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| timestamp | |||||||||||||||||||||
| 2020-05-01 04:00:00+00:00 | NaN | 99.0 | 0 | 1 | 1 | 0.00 | 62.0 | 0 | 70.65 | 70.01 | ... | 0 | 1 | 1 | 0 | 0 | 46.8 | 1.0 | 44.6 | 1.0 | 1 |
| 2020-05-01 04:15:00+00:00 | NaN | 100.0 | 0 | 1 | 1 | 0.00 | 62.0 | 0 | 70.88 | 70.08 | ... | 0 | 1 | 1 | 0 | 0 | 46.8 | 1.0 | 44.6 | 1.0 | 1 |
| 2020-05-01 04:30:00+00:00 | NaN | 99.0 | 0 | 1 | 1 | 0.00 | 62.0 | 0 | 71.05 | 70.13 | ... | 0 | 1 | 1 | 0 | 0 | 46.8 | 1.0 | 44.6 | 1.0 | 1 |
| 2020-05-01 04:45:00+00:00 | NaN | 100.0 | 0 | 1 | 1 | 0.00 | 62.0 | 0 | 71.20 | 70.17 | ... | 0 | 1 | 1 | 0 | 0 | 46.8 | 1.0 | 44.6 | 1.0 | 1 |
| 2020-05-01 05:00:00+00:00 | NaN | 100.0 | 0 | 1 | 1 | 0.00 | 62.0 | 0 | 71.35 | 70.23 | ... | 0 | 1 | 1 | 0 | 0 | 46.8 | 1.0 | 44.6 | 1.0 | 1 |
| ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... |
| 2020-10-31 23:00:00+00:00 | NaN | 0.0 | 0 | 0 | 0 | 60.30 | 62.0 | 0 | 64.00 | 66.66 | ... | 0 | 1 | 1 | 0 | 0 | 47.3 | 1.0 | 0.0 | 0.0 | 0 |
| 2020-10-31 23:15:00+00:00 | NaN | 0.0 | 0 | 0 | 0 | 60.38 | 62.0 | 0 | 66.43 | 66.61 | ... | 0 | 1 | 1 | 0 | 0 | 47.3 | 1.0 | 0.0 | 0.0 | 0 |
| 2020-10-31 23:30:00+00:00 | NaN | 0.0 | 0 | 0 | 0 | 60.31 | 62.0 | 0 | 64.25 | 66.53 | ... | 0 | 1 | 1 | 0 | 0 | 47.3 | 1.0 | 0.0 | 0.0 | 0 |
| 2020-10-31 23:45:00+00:00 | NaN | 0.0 | 0 | 0 | 0 | 60.49 | 62.0 | 0 | 64.51 | 66.61 | ... | 0 | 1 | 1 | 0 | 0 | 47.3 | 1.0 | 0.0 | 0.0 | 0 |
| 2020-11-01 00:00:00+00:00 | NaN | 0.0 | 0 | 0 | 0 | 60.44 | 62.0 | 0 | 66.30 | 66.55 | ... | 0 | 1 | 1 | 0 | 0 | 47.3 | 1.0 | 0.0 | 0.0 | 0 |
17645 rows × 1111 columns
# Select columns and standardize column names
def standardize_column_names(building_id):
Building_Data_path = os.path.join(DATA_PATH, F"Building_{building_id}")
# Point_Table
Point_table_file = [point_table for point_table in os.listdir(Building_Data_path) if "Point_Table" in point_table]
Point_table = pd.read_excel(os.path.join(DATA_PATH, F"Building_{building_id}", Point_table_file[0]))
Point_table.sort_values(by="Point_ID", inplace=True)
select_cols = [str(col) for col in Point_table.loc[:, "Point_ID"]]
# Process data
data = merge_building_data(building_id).loc[:, select_cols]
data.columns = list(Point_table.loc[:, "Standardized_Point_Name"])
for idx in Point_table.index:
if (Point_table.loc[idx, "Unit"] == "F") or (Point_table.loc[idx, "Unit"] == "f"):
data.loc[:, Point_table.loc[idx, "Standardized_Point_Name"]] = (data.loc[:, Point_table.loc[idx, "Standardized_Point_Name"]].values - 32) * 5 / 9
if (Point_table.loc[idx, "Unit"] == "GPM") or (Point_table.loc[idx, "Unit"] == "gpm"): # GPM to LPH
data.loc[:, Point_table.loc[idx, "Standardized_Point_Name"]] = (data.loc[:, Point_table.loc[idx, "Standardized_Point_Name"]].values) * 227.1
return data
building_id = 438
merged_data = standardize_column_names(building_id)
display(merged_data.head(5))
OUTPUT_DATA_PATH = os.path.join(BATH_PATH, "..", "data", F"Building_{building_id}", "output_data")
merged_data.to_csv(os.path.join(OUTPUT_DATA_PATH, F"merged_data_{building_id}.csv"))
print(F"merged_data_{building_id} data saved!")
| CHP_22_3-CHP_KWH | CHP_22_3-CHP_A | CHP_22_3-CHP_KW | CHP_22_3-CHP_VFD | CHP_22_4-CHP_KWH | CHP_22_4-CHP_A | CHP_22_4-CHP_KW | CHP_22_4-CHP_VFD | CH_22_3-QCH | CH_22_3-CH_KWH | ... | CH_22_4-TCWS | CH_22_4-TCHS | CH_22_4-TCHS_SP | CH_22_4-IPCT | CH_22_4-CH_KW | CT_1-TCWR_1 | CT_2-TCWR_2 | CT_3-TCWR_3 | CT_4-TCWR_4 | CT_5-TCWR_5 | |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| timestamp | |||||||||||||||||||||
| 2020-05-01 04:00:00+00:00 | 867.7 | 33.4 | 5.3 | 20.6 | 681.4 | 33.0 | 4.5 | 20.9 | 283.875 | 3539610.0 | ... | 17.572222 | 13.694444 | 4.444444 | 0.0 | 0.0 | 13.777778 | 12.166667 | 12.277778 | 12.444444 | 12.333333 |
| 2020-05-01 04:15:00+00:00 | 867.7 | 33.4 | 4.9 | 20.6 | 681.4 | 33.2 | 4.4 | 20.9 | 283.875 | 3539610.0 | ... | 17.466667 | 13.505556 | 4.444444 | 0.0 | 0.0 | 13.277778 | 12.055556 | 12.166667 | 12.333333 | 12.111111 |
| 2020-05-01 04:30:00+00:00 | 867.7 | 33.4 | 4.9 | 20.6 | 681.4 | 33.1 | 4.4 | 20.9 | 283.875 | 3539610.0 | ... | 17.444444 | 13.511111 | 4.444444 | 0.0 | 0.0 | 13.000000 | 11.888889 | 12.111111 | 12.222222 | 12.000000 |
| 2020-05-01 04:45:00+00:00 | 867.7 | 33.4 | 5.1 | 20.6 | 681.4 | 33.1 | 4.3 | 20.9 | 283.875 | 3539610.0 | ... | 17.372222 | 13.538889 | 4.444444 | 0.0 | 0.0 | 13.000000 | 11.944444 | 12.166667 | 12.222222 | 12.055556 |
| 2020-05-01 05:00:00+00:00 | 867.7 | 33.4 | 4.9 | 20.6 | 681.4 | 33.2 | 4.5 | 20.9 | 283.875 | 3539610.0 | ... | 17.316667 | 13.511111 | 4.444444 | 0.0 | 0.0 | 13.055556 | 11.944444 | 12.166667 | 12.222222 | 12.000000 |
5 rows × 29 columns
merged_data_438 data saved!
# Split Building Device
def split_building_device(building_id):
Building_Data_path = os.path.join(DATA_PATH, F"Building_{building_id}")
Point_table_file = [point_table for point_table in os.listdir(Building_Data_path) if "Point_Table" in point_table]
Device_Table = pd.read_excel(os.path.join(DATA_PATH, F"Building_{building_id}", Point_table_file[0]), sheet_name="Device_class")
Device_Table["Keywords"] = [keyword.replace(";", "|") for keyword in Device_Table["Keywords"]]
data = standardize_column_names(building_id)
# Classify Points by Device_Name
Chiller_dictionary = {}
filter_Table = pd.DataFrame.from_dict({"TCHD_cal":[0, 10], "RT_cal":[0, 50000], "CH_KW":[0, 50000], "KWRT_cal":[0, 3], "COP_cal":[0, 10]}, orient="index", columns=["Filter_bottom", "Filter_top"])
for idx in Device_Table.index:
chiller_name = Device_Table.loc[idx, "Device_Name"]
chiller_data_cols = data.columns.str.contains(Device_Table.loc[idx, "Keywords"], regex=True)
chiller_data = data.loc[:, chiller_data_cols].copy()
for col in chiller_data.columns:
try:
chiller_data.rename(columns={col: F'{col.split("-")[1]}'}, inplace=True)
except:
pass
chiller_data.drop(["CH_KWH"], axis=1, inplace=True)
# Process chiller_data
TCHD = chiller_data.loc[:, "TCHR"].values - chiller_data.loc[:, "TCHS"].values
RT_cal = chiller_data.loc[:, "QCH"].values * TCHD / 3024 # USRT
# RT_cal = chiller_data.loc[:, "QCH"].values * TCHD * 1000/3600 * 4.2 / 3.516
KWRT_cal = chiller_data.loc[:, "CH_KW"] / RT_cal
COP_cal = 3.516 / KWRT_cal
chiller_data.loc[:, "TCHD_cal"] = TCHD
chiller_data.loc[:, "RT_cal"] = RT_cal
chiller_data.loc[:, "KWRT_cal"] = KWRT_cal
chiller_data.loc[:, "COP_cal"] = COP_cal
for col in chiller_data.columns:
if col in filter_Table.index:
chiller_data.loc[:, col] = np.where(np.logical_or(chiller_data.loc[:, col]>filter_Table.loc[col, "Filter_top"], chiller_data.loc[:, col]<filter_Table.loc[col, "Filter_bottom"]), np.nan, chiller_data.loc[:, col])
Chiller_dictionary[chiller_name] = chiller_data.dropna()
return Chiller_dictionary
Chillers = split_building_device(building_id)
OUTPUT_DATA_PATH = os.path.join(BATH_PATH, "..", "data", F"Building_{building_id}", "output_data")
for chiller in Chillers.keys():
temp_df = Chillers[chiller].head(5)
display(temp_df.head(5))
temp_df.to_csv(os.path.join(OUTPUT_DATA_PATH, F"{chiller}_data.csv"))
print(F"{chiller} data saved!")
| CHP_KWH | CHP_A | CHP_KW | CHP_VFD | QCH | TCHR | TCWS | TCHS | TCHS_SP | IPCT | CH_KW | TCWR_1 | TCWR_2 | TCWR_3 | TCWR_4 | TCWR_5 | TCHD_cal | RT_cal | KWRT_cal | COP_cal | |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| timestamp | ||||||||||||||||||||
| 2020-05-01 11:00:00+00:00 | 867.7 | 36.2 | 10.6 | 27.4 | 488405.802 | 8.622222 | 23.922222 | 3.911111 | 4.444444 | 1086.01 | 579.57 | 13.166667 | 15.722222 | 18.944444 | 15.888889 | 18.333333 | 4.711111 | 760.890873 | 0.761699 | 4.615995 |
| 2020-05-01 11:15:00+00:00 | 867.7 | 36.2 | 10.0 | 27.4 | 469813.125 | 7.955556 | 23.505556 | 3.222222 | 4.444444 | 1042.88 | 557.03 | 13.222222 | 15.277778 | 19.055556 | 15.722222 | 18.388889 | 4.733333 | 735.377687 | 0.757475 | 4.641739 |
| 2020-05-01 11:30:00+00:00 | 867.7 | 36.0 | 10.5 | 27.4 | 479889.552 | 7.461111 | 21.405556 | 2.688889 | 4.444444 | 898.14 | 483.28 | 13.277778 | 15.555556 | 18.222222 | 18.388889 | 17.111111 | 4.772222 | 757.321291 | 0.638144 | 5.509729 |
| 2020-05-01 11:45:00+00:00 | 867.7 | 36.0 | 10.5 | 27.4 | 479181.000 | 7.088889 | 23.005556 | 2.627778 | 4.444444 | 830.20 | 456.86 | 13.444444 | 21.722222 | 18.333333 | 18.388889 | 21.833333 | 4.461111 | 706.904657 | 0.646282 | 5.440347 |
| 2020-05-01 12:00:00+00:00 | 867.7 | 36.0 | 9.6 | 27.4 | 470664.750 | 6.761111 | 25.183333 | 2.672222 | 4.444444 | 765.71 | 431.78 | 13.611111 | 23.333333 | 18.722222 | 18.333333 | 23.388889 | 4.088889 | 636.407363 | 0.678465 | 5.182288 |
CH_22_3 data saved!
| CHP_KWH | CHP_A | CHP_KW | CHP_VFD | QCH | TCHR | TCWS | TCHS | TCHS_SP | IPCT | CH_KW | TCWR_1 | TCWR_2 | TCWR_3 | TCWR_4 | TCWR_5 | TCHD_cal | RT_cal | KWRT_cal | COP_cal | |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| timestamp | ||||||||||||||||||||
| 2020-07-20 15:30:00+00:00 | 706.0 | 58.1 | 47.7 | 49.2 | 841830.177 | 9.672222 | 30.161111 | 5.850000 | 4.444444 | 898.24 | 483.32 | 23.166667 | 23.611111 | 23.222222 | 23.222222 | 23.777778 | 3.822222 | 1064.041670 | 0.454230 | 7.740566 |
| 2020-07-20 15:45:00+00:00 | 706.0 | 65.1 | 58.2 | 52.8 | 659298.552 | 10.372222 | 29.161111 | 5.938889 | 4.444444 | 776.45 | 435.95 | 23.166667 | 23.611111 | 23.111111 | 23.222222 | 23.833333 | 4.433333 | 966.564235 | 0.451031 | 7.795481 |
| 2020-07-20 16:00:00+00:00 | 706.0 | 44.7 | 25.0 | 39.3 | 485994.000 | 12.072222 | 29.100000 | 6.200000 | 4.444444 | 908.74 | 488.47 | 23.277778 | 23.666667 | 23.333333 | 23.277778 | 23.888889 | 5.872222 | 943.738349 | 0.517590 | 6.793015 |
| 2020-07-20 16:15:00+00:00 | 706.0 | 49.1 | 32.3 | 39.5 | 521903.052 | 12.188889 | 29.761111 | 6.133333 | 4.444444 | 921.81 | 495.15 | 23.500000 | 23.777778 | 23.611111 | 23.611111 | 24.166667 | 6.055556 | 1045.110095 | 0.473778 | 7.421200 |
| 2020-07-20 16:30:00+00:00 | 706.0 | 46.8 | 28.4 | 39.5 | 500471.625 | 11.933333 | 29.844444 | 5.827778 | 4.444444 | 936.06 | 502.43 | 23.722222 | 23.722222 | 23.833333 | 23.500000 | 23.944444 | 6.105556 | 1010.468687 | 0.497225 | 7.071250 |
CH_22_4 data saved!
# Plot Figures
import plotly.graph_objects as go
import chart_studio.plotly as py
import cufflinks as cf
import seaborn as sns
import matplotlib.pyplot as plt
cf.go_offline()
# RT vs KWRT
Load_Data_List = []
for chiller in Chillers.keys():
temp_df = Chillers[chiller]
temp_df.loc[:, "Chiller_Name"] = chiller
print(F"{chiller} data read: ")
display(temp_df.head(5))
Load_Data_List.append(temp_df)
# Plot
Fig = go.Figure()
Fig.add_trace(go.Scatter(name="RT-KWRT", x=temp_df["RT_cal"], y=temp_df["KWRT_cal"], mode="markers"))
Fig.update_layout(
title = dict(text=F"Cooling Load vs KW/RT (Chiller: {chiller})", x=0.04, y=0.83),
xaxis = dict(
title=dict(text="Cooling Load (RT)", font=dict(size=12), standoff=0),
showline=True, linewidth=1.2, linecolor="black",
showticklabels=True, showgrid=True, gridcolor="rgba(230, 230, 230, 1)",
rangeslider=dict(visible=False)),
yaxis = dict(
title=dict(text="Energy Performance (KW/RT)", font=dict(size=12), standoff=0),
dtick=0.1,
showline=True, linewidth=1.2, linecolor="black",
zeroline=True, zerolinewidth=1.2, zerolinecolor="rgba(230, 230, 230, 1)",
showgrid=True, gridcolor="rgba(230, 230, 230, 1)"),
legend = dict(x=1.02, y=0.5, orientation="v", bordercolor="black", borderwidth=0.5, font=dict(size=8)),
plot_bgcolor="white", width=700, height=400)
Fig.show()
CH_22_3 data read:
| CHP_KWH | CHP_A | CHP_KW | CHP_VFD | QCH | TCHR | TCWS | TCHS | TCHS_SP | IPCT | ... | TCWR_1 | TCWR_2 | TCWR_3 | TCWR_4 | TCWR_5 | TCHD_cal | RT_cal | KWRT_cal | COP_cal | Chiller_Name | |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| timestamp | |||||||||||||||||||||
| 2020-05-01 11:00:00+00:00 | 867.7 | 36.2 | 10.6 | 27.4 | 488405.802 | 8.622222 | 23.922222 | 3.911111 | 4.444444 | 1086.01 | ... | 13.166667 | 15.722222 | 18.944444 | 15.888889 | 18.333333 | 4.711111 | 760.890873 | 0.761699 | 4.615995 | CH_22_3 |
| 2020-05-01 11:15:00+00:00 | 867.7 | 36.2 | 10.0 | 27.4 | 469813.125 | 7.955556 | 23.505556 | 3.222222 | 4.444444 | 1042.88 | ... | 13.222222 | 15.277778 | 19.055556 | 15.722222 | 18.388889 | 4.733333 | 735.377687 | 0.757475 | 4.641739 | CH_22_3 |
| 2020-05-01 11:30:00+00:00 | 867.7 | 36.0 | 10.5 | 27.4 | 479889.552 | 7.461111 | 21.405556 | 2.688889 | 4.444444 | 898.14 | ... | 13.277778 | 15.555556 | 18.222222 | 18.388889 | 17.111111 | 4.772222 | 757.321291 | 0.638144 | 5.509729 | CH_22_3 |
| 2020-05-01 11:45:00+00:00 | 867.7 | 36.0 | 10.5 | 27.4 | 479181.000 | 7.088889 | 23.005556 | 2.627778 | 4.444444 | 830.20 | ... | 13.444444 | 21.722222 | 18.333333 | 18.388889 | 21.833333 | 4.461111 | 706.904657 | 0.646282 | 5.440347 | CH_22_3 |
| 2020-05-01 12:00:00+00:00 | 867.7 | 36.0 | 9.6 | 27.4 | 470664.750 | 6.761111 | 25.183333 | 2.672222 | 4.444444 | 765.71 | ... | 13.611111 | 23.333333 | 18.722222 | 18.333333 | 23.388889 | 4.088889 | 636.407363 | 0.678465 | 5.182288 | CH_22_3 |
5 rows × 21 columns
CH_22_4 data read:
| CHP_KWH | CHP_A | CHP_KW | CHP_VFD | QCH | TCHR | TCWS | TCHS | TCHS_SP | IPCT | ... | TCWR_1 | TCWR_2 | TCWR_3 | TCWR_4 | TCWR_5 | TCHD_cal | RT_cal | KWRT_cal | COP_cal | Chiller_Name | |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| timestamp | |||||||||||||||||||||
| 2020-07-20 15:30:00+00:00 | 706.0 | 58.1 | 47.7 | 49.2 | 841830.177 | 9.672222 | 30.161111 | 5.850000 | 4.444444 | 898.24 | ... | 23.166667 | 23.611111 | 23.222222 | 23.222222 | 23.777778 | 3.822222 | 1064.041670 | 0.454230 | 7.740566 | CH_22_4 |
| 2020-07-20 15:45:00+00:00 | 706.0 | 65.1 | 58.2 | 52.8 | 659298.552 | 10.372222 | 29.161111 | 5.938889 | 4.444444 | 776.45 | ... | 23.166667 | 23.611111 | 23.111111 | 23.222222 | 23.833333 | 4.433333 | 966.564235 | 0.451031 | 7.795481 | CH_22_4 |
| 2020-07-20 16:00:00+00:00 | 706.0 | 44.7 | 25.0 | 39.3 | 485994.000 | 12.072222 | 29.100000 | 6.200000 | 4.444444 | 908.74 | ... | 23.277778 | 23.666667 | 23.333333 | 23.277778 | 23.888889 | 5.872222 | 943.738349 | 0.517590 | 6.793015 | CH_22_4 |
| 2020-07-20 16:15:00+00:00 | 706.0 | 49.1 | 32.3 | 39.5 | 521903.052 | 12.188889 | 29.761111 | 6.133333 | 4.444444 | 921.81 | ... | 23.500000 | 23.777778 | 23.611111 | 23.611111 | 24.166667 | 6.055556 | 1045.110095 | 0.473778 | 7.421200 | CH_22_4 |
| 2020-07-20 16:30:00+00:00 | 706.0 | 46.8 | 28.4 | 39.5 | 500471.625 | 11.933333 | 29.844444 | 5.827778 | 4.444444 | 936.06 | ... | 23.722222 | 23.722222 | 23.833333 | 23.500000 | 23.944444 | 6.105556 | 1010.468687 | 0.497225 | 7.071250 | CH_22_4 |
5 rows × 21 columns
# Plot Timeseries & Analysis Table
Total_Load_cal = pd.concat(Load_Data_List, axis=1, join="outer")
Total_Load_cal.loc[:, "RT_total"] = Total_Load_cal["RT_cal"].sum(axis=1)
Load_Data_DF = pd.concat(Load_Data_List, axis=0)
Chiller_List = list(Load_Data_DF.loc[:, "Chiller_Name"].unique())
Fig = go.Figure()
for chiller in Chiller_List:
Fig.add_trace(go.Scatter(name=chiller, x=Load_Data_DF[Load_Data_DF["Chiller_Name"]==chiller].index, y=Load_Data_DF[Load_Data_DF["Chiller_Name"]==chiller]["RT_cal"]))
Fig.add_trace(go.Scatter(name="RT_total", x=Total_Load_cal.index, y=Total_Load_cal["RT_total"]))
Fig.update_layout(
title = dict(text=F"Cooling Load Timeseries", x=0.04, y=0.83),
xaxis = dict(
showline=True, linewidth=1.2, linecolor="black",
showticklabels=True, showgrid=True, gridcolor="rgba(230, 230, 230, 1)",
rangeslider=dict(visible=False)),
yaxis = dict(
title=dict(text="Cooling Load (RT)", font=dict(size=12), standoff=0),
showline=True, linewidth=1.2, linecolor="black",
zeroline=True, zerolinewidth=1.2, zerolinecolor="rgba(230, 230, 230, 1)",
showgrid=True, gridcolor="rgba(230, 230, 230, 1)"),
legend = dict(x=1.02, y=0.5, orientation="v", bordercolor="black", borderwidth=0.5, font=dict(size=8)),
plot_bgcolor="white", width=700, height=400)
Fig.show()
# TCH Analysis
Load_Data_DF.reset_index(drop=False, inplace=True)
TCH_analysis = Load_Data_DF.pivot_table(index="timestamp", columns="Chiller_Name", values=["TCHS", "TCHR"]).describe().round(2)
print("Chilled Water Analysis")
display(TCH_analysis)
DF = Load_Data_DF.pivot_table(index="timestamp", columns="Chiller_Name", values=["TCHS", "TCHR"])
x_targ = DF.index
fig = go.Figure()
for Chiller_name, color_thick, color_thin in zip(Chiller_List, ["#2CBDFE", "#00cc99"], ["#80d6fe", "#b3ffec"]):
try:
y_targ1 = DF["TCHS"][Chiller_name]
y_targ2 = DF["TCHR"][Chiller_name]
fig.add_trace(go.Scatter(name=F"{Chiller_name}-TCHS", x=x_targ, y=y_targ1, mode="lines", line=dict(color=color_thick), showlegend=True))
fig.add_trace(go.Scatter(name=F"{Chiller_name}-TCHR", x=x_targ, y=y_targ2, mode="lines", line=dict(color=color_thin), showlegend=True))
except:
print(F"{Chiller_name} Data is not sufficient to plot...\n")
fig.update_layout(
title = dict(text=F"TCH Timeseries", x=0.04, y=0.83),
xaxis = dict(
showline=True, linewidth=1.2, linecolor="black",
showticklabels=True, showgrid=True, gridcolor="rgba(230, 230, 230, 1)",
rangeslider=dict(visible=False)),
yaxis = dict(
title=dict(text="Temp. (<sup>o</sup>C)", font=dict(size=12), standoff=0),
showline=True, linewidth=1.2, linecolor="black",
zeroline=True, zerolinewidth=1.2, zerolinecolor="rgba(230, 230, 230, 1)",
showgrid=True, gridcolor="rgba(230, 230, 230, 1)"),
legend = dict(x=1.02, y=0.5, orientation="v", bordercolor="black", borderwidth=0.5, font=dict(size=8)),
plot_bgcolor="white", width=700, height=400)
fig.show()
Chilled Water Analysis
| TCHR | TCHS | |||
|---|---|---|---|---|
| Chiller_Name | CH_22_3 | CH_22_4 | CH_22_3 | CH_22_4 |
| count | 12542.00 | 578.00 | 12542.00 | 578.00 |
| mean | 9.89 | 11.05 | 5.74 | 5.73 |
| std | 1.65 | 0.66 | 1.09 | 0.23 |
| min | 4.84 | 9.51 | 2.29 | 4.87 |
| 25% | 8.83 | 10.63 | 5.31 | 5.63 |
| 50% | 10.07 | 11.06 | 5.66 | 5.72 |
| 75% | 11.32 | 11.54 | 6.59 | 5.83 |
| max | 15.42 | 12.96 | 9.38 | 7.04 |
# QCH Analysis
Load_Data_DF.reset_index(drop=False, inplace=True)
QCH_analysis = Load_Data_DF.pivot_table(index="timestamp", columns="Chiller_Name", values=["QCH"]).describe().round(2)
print("QCH Analysis")
display(QCH_analysis)
DF = Load_Data_DF.pivot_table(index="timestamp", columns="Chiller_Name", values=["QCH"])
x_targ = DF.index
fig = go.Figure()
for Chiller_name, color_thick, color_thin in zip(Chiller_List, ["#2CBDFE", "#00cc99"], ["#80d6fe", "#b3ffec"]):
try:
y_targ1 = DF["QCH"][Chiller_name]
fig.add_trace(go.Scatter(name=F"{Chiller_name}-QCH", x=x_targ, y=y_targ1, mode="lines", line=dict(color=color_thick), showlegend=True))
except:
print(F"{Chiller_name} Data is not sufficient to plot...\n")
fig.update_layout(
title = dict(text=F"QCH Timeseries", x=0.04, y=0.83),
xaxis = dict(
showline=True, linewidth=1.2, linecolor="black",
showticklabels=True, showgrid=True, gridcolor="rgba(230, 230, 230, 1)",
rangeslider=dict(visible=False)),
yaxis = dict(
title=dict(text="QCH (LPH)", font=dict(size=12), standoff=0),
showline=True, linewidth=1.2, linecolor="black",
zeroline=True, zerolinewidth=1.2, zerolinecolor="rgba(230, 230, 230, 1)",
showgrid=True, gridcolor="rgba(230, 230, 230, 1)"),
legend = dict(x=1.02, y=0.5, orientation="v", bordercolor="black", borderwidth=0.5, font=dict(size=8)),
plot_bgcolor="white", width=700, height=400)
fig.show()
QCH Analysis
| QCH | ||
|---|---|---|
| Chiller_Name | CH_22_3 | CH_22_4 |
| count | 12542.00 | 578.00 |
| mean | 724934.26 | 784500.48 |
| std | 137417.94 | 169188.01 |
| min | 421127.43 | 436456.68 |
| 25% | 620266.88 | 778101.38 |
| 50% | 731402.80 | 852688.96 |
| 75% | 828488.05 | 904035.14 |
| max | 1196817.00 | 1081704.55 |
# TCW Analysis
Load_Data_DF.loc[:, "TCWR"] = Load_Data_DF.loc[:, ["TCWR_1", "TCWR_2", "TCWR_3", "TCWR_4", "TCWR_5"]].mean(axis=1)
TCW_analysis = Load_Data_DF.pivot_table(index="timestamp", columns="Chiller_Name", values=["TCWS", "TCWR"]).describe().round(2)
print("Cooling Water Analysis")
display(TCW_analysis)
DF = Load_Data_DF.pivot_table(index="timestamp", columns="Chiller_Name", values=["TCWS", "TCWR"])
x_targ = DF.index
fig = go.Figure()
for Chiller_name, color_thick, color_thin in zip(Chiller_List, ["#2CBDFE", "#00cc99"], ["#80d6fe", "#b3ffec"]):
try:
y_targ1 = DF["TCWS"][Chiller_name]
y_targ2 = DF["TCWR"][Chiller_name]
fig.add_trace(go.Scatter(name=F"{Chiller_name}-TCWS", x=x_targ, y=y_targ1, mode="lines", line=dict(color=color_thick), showlegend=True))
fig.add_trace(go.Scatter(name=F"{Chiller_name}-TCWR", x=x_targ, y=y_targ2, mode="lines", line=dict(color=color_thin), showlegend=True))
except:
print(F"{Chiller_name} Data is not sufficient to plot...\n")
fig.update_layout(
title = dict(text=F"TCW Timeseries", x=0.04, y=0.83),
xaxis = dict(
showline=True, linewidth=1.2, linecolor="black",
showticklabels=True, showgrid=True, gridcolor="rgba(230, 230, 230, 1)",
rangeslider=dict(visible=False)),
yaxis = dict(
title=dict(text="Temp. (<sup>o</sup>C)", font=dict(size=12), standoff=0),
showline=True, linewidth=1.2, linecolor="black",
zeroline=True, zerolinewidth=1.2, zerolinecolor="rgba(230, 230, 230, 1)",
showgrid=True, gridcolor="rgba(230, 230, 230, 1)"),
legend = dict(x=1.02, y=0.5, orientation="v", bordercolor="black", borderwidth=0.5, font=dict(size=8)),
plot_bgcolor="white", width=700, height=400)
fig.show()
Cooling Water Analysis
| TCWR | TCWS | |||
|---|---|---|---|---|
| Chiller_Name | CH_22_3 | CH_22_4 | CH_22_3 | CH_22_4 |
| count | 12542.00 | 578.00 | 12542.00 | 578.00 |
| mean | 21.46 | 21.60 | 27.14 | 26.76 |
| std | 1.60 | 1.26 | 1.83 | 1.78 |
| min | 9.26 | 19.41 | 15.12 | 23.19 |
| 25% | 20.71 | 20.62 | 25.96 | 25.38 |
| 50% | 21.58 | 21.46 | 27.16 | 26.59 |
| 75% | 22.41 | 22.69 | 28.42 | 28.05 |
| max | 26.50 | 24.51 | 33.26 | 31.88 |
# RT Analysis
RT_analysis = Load_Data_DF.pivot_table(index="timestamp", columns="Chiller_Name", values=["RT_cal"]).describe().round(2)
print("Cooling Load Analysis")
display(RT_analysis)
DF = Load_Data_DF.pivot_table(index="timestamp", columns="Chiller_Name", values=["RT_cal"])
x_targ = DF.index
fig = go.Figure()
for Chiller_name, color_thick, color_thin in zip(Chiller_List, ["#2CBDFE", "#00cc99"], ["#80d6fe", "#b3ffec"]):
try:
y_targ1 = DF["RT_cal"][Chiller_name]
fig.add_trace(go.Scatter(name=F"{Chiller_name}-RT", x=x_targ, y=y_targ1, mode="lines", line=dict(color=color_thick), showlegend=True))
except:
print(F"{Chiller_name} Data is not sufficient to plot...\n")
fig.update_layout(
title = dict(text=F"Cooling Load Timeseries", x=0.04, y=0.83),
xaxis = dict(
showline=True, linewidth=1.2, linecolor="black",
showticklabels=True, showgrid=True, gridcolor="rgba(230, 230, 230, 1)",
rangeslider=dict(visible=False)),
yaxis = dict(
title=dict(text="Cooling Load (USRT)", font=dict(size=12), standoff=0),
showline=True, linewidth=1.2, linecolor="black",
zeroline=True, zerolinewidth=1.2, zerolinecolor="rgba(230, 230, 230, 1)",
showgrid=True, gridcolor="rgba(230, 230, 230, 1)"),
legend = dict(x=1.02, y=0.5, orientation="v", bordercolor="black", borderwidth=0.5, font=dict(size=8)),
plot_bgcolor="white", width=700, height=400)
fig.show()
Cooling Load Analysis
| RT_cal | ||
|---|---|---|
| Chiller_Name | CH_22_3 | CH_22_4 |
| count | 12542.00 | 578.00 |
| mean | 1014.86 | 1362.25 |
| std | 347.59 | 289.81 |
| min | 114.88 | 826.04 |
| 25% | 720.33 | 1085.38 |
| 50% | 959.34 | 1398.41 |
| 75% | 1284.17 | 1589.03 |
| max | 2335.06 | 2132.13 |
# KWRT Analysis
KWRT_analysis = Load_Data_DF.pivot_table(index="timestamp", columns="Chiller_Name", values=["KWRT_cal"]).describe().round(2)
print("Chiller Performance Analysis")
display(KWRT_analysis)
DF = Load_Data_DF.pivot_table(index="timestamp", columns="Chiller_Name", values=["KWRT_cal"])
x_targ = DF.index
fig = go.Figure()
for Chiller_name, color_thick, color_thin in zip(Chiller_List, ["#2CBDFE", "#00cc99"], ["#80d6fe", "#b3ffec"]):
try:
y_targ1 = DF["KWRT_cal"][Chiller_name]
fig.add_trace(go.Scatter(name=F"{Chiller_name}-KWRT", x=x_targ, y=y_targ1, mode="lines", line=dict(color=color_thick), showlegend=True))
except:
print(F"{Chiller_name} Data is not sufficient to plot...\n")
fig.update_layout(
title = dict(text=F"Chiller Performance Timeseries", x=0.04, y=0.83),
xaxis = dict(
showline=True, linewidth=1.2, linecolor="black",
showticklabels=True, showgrid=True, gridcolor="rgba(230, 230, 230, 1)",
rangeslider=dict(visible=False)),
yaxis = dict(
title=dict(text="Chiller Performance (KW/RT)", font=dict(size=12), standoff=0),
showline=True, linewidth=1.2, linecolor="black",
zeroline=True, zerolinewidth=1.2, zerolinecolor="rgba(230, 230, 230, 1)",
showgrid=True, gridcolor="rgba(230, 230, 230, 1)"),
legend = dict(x=1.02, y=0.5, orientation="v", bordercolor="black", borderwidth=0.5, font=dict(size=8)),
plot_bgcolor="white", width=700, height=400)
fig.show()
Chiller Performance Analysis
| KWRT_cal | ||
|---|---|---|
| Chiller_Name | CH_22_3 | CH_22_4 |
| count | 12542.00 | 578.00 |
| mean | 0.57 | 0.51 |
| std | 0.05 | 0.03 |
| min | 0.36 | 0.36 |
| 25% | 0.54 | 0.49 |
| 50% | 0.56 | 0.50 |
| 75% | 0.59 | 0.53 |
| max | 1.68 | 0.62 |